return -EINVAL;
}
+ /* Paranoia: scrub DOM0's memory allocation. */
+ memset((void *)alloc_start, 0, alloc_end - alloc_start);
+
/* Construct a frame-allocation list for the initial domain. */
for ( mfn = (alloc_start>>PAGE_SHIFT);
mfn < (alloc_end>>PAGE_SHIFT);
* One bit per page of memory. Bit set => page is allocated.
*/
+static unsigned long bitmap_size; /* in bytes */
static unsigned long *alloc_bitmap;
#define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
unsigned long bitmap_start, unsigned long max_pages)
{
int i, j;
- unsigned long bitmap_size, bad_pfn;
+ unsigned long bad_pfn;
char *p;
memset(avail, 0, sizeof(avail));
}
+/*
+ * Scrub all unallocated pages in all heap zones. This function is more
+ * convoluted than appears necessary because we do not want to continuously
+ * hold the lock or disable interrupts while scrubbing very large memory areas.
+ */
+void scrub_heap_pages(void)
+{
+ void *p;
+ unsigned long pfn, flags;
+
+ for ( pfn = 0; pfn < (bitmap_size * 8); pfn++ )
+ {
+ /* Quick lock-free check. */
+ if ( allocated_in_map(pfn) )
+ continue;
+
+ spin_lock_irqsave(&heap_lock, flags);
+
+ /* Re-check page status with lock held. */
+ if ( !allocated_in_map(pfn) )
+ {
+ p = map_domain_mem(pfn << PAGE_SHIFT);
+ clear_page(p);
+ unmap_domain_mem(p);
+ }
+
+ spin_unlock_irqrestore(&heap_lock, flags);
+ }
+}
+
+
/*************************
* XEN-HEAP SUB-ALLOCATOR
void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages);
struct pfn_info *alloc_heap_pages(int zone, int order);
void free_heap_pages(int zone, struct pfn_info *pg, int order);
+void scrub_heap_pages(void);
/* Xen suballocator */
void init_xenheap_pages(unsigned long ps, unsigned long pe);